return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
}
+/* Works only for vcpu == current */
+static void vmx_update_host_cr3(struct vcpu *v)
+{
+ ASSERT(v == current);
+ __vmwrite(HOST_CR3, v->arch.cr3);
+}
+
static void vmx_inject_exception(unsigned int trapnr, int errcode)
{
vmx_inject_hw_exception(current, trapnr, errcode);
return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);
}
-static inline u32
+static u32
shadow_l1_index(mfn_t *smfn, u32 guest_index)
{
#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
#endif
}
-static inline u32
+static u32
shadow_l2_index(mfn_t *smfn, u32 guest_index)
{
#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
#if GUEST_PAGING_LEVELS >= 4
-static inline u32
+static u32
shadow_l3_index(mfn_t *smfn, u32 guest_index)
{
return guest_index;
}
-static inline u32
+static u32
shadow_l4_index(mfn_t *smfn, u32 guest_index)
{
return guest_index;
return efer & EFER_LME;
}
-/* Works only for vcpu == current */
-static inline void vmx_update_host_cr3(struct vcpu *v)
-{
- ASSERT(v == current);
- __vmwrite(HOST_CR3, v->arch.cr3);
-}
-
static inline int vmx_pgbit_test(struct vcpu *v)
{
unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;